In [1]:
    
%matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
    
In [3]:
    
import tensorflow as tf
    
In [5]:
    
fire_data = pd.read_excel('../../Dataset/fire_theft.xls')
fire_data.head(5)
    
    
    Out[5]:
In [6]:
    
input_data = fire_data['X']
labels = fire_data['Y']
print input_data.shape,labels.shape
    
    
In [7]:
    
def huber_loss(predictions,labels,delta=1.0):
    difference = tf.abs(labels - predictions)
    condition = tf.less(difference, delta)
    small_res = 0.5*tf.square(difference)
    large_res = difference + 0.5*tf.square(delta)
    return tf.where(condition,small_res,large_res)
    
In [6]:
    
## Defining the placeholders for data and labels
X = tf.placeholder(tf.float32,name="X")
y = tf.placeholder(tf.float32,name="y")
    
In [7]:
    
## Defining the variables for weights and biases
W = tf.Variable(0.0,name="W")
b = tf.Variable(1.0,name="b")
print W
print b
    
    
In [8]:
    
## Constructing the model
ypred = tf.add(tf.multiply(X,W),b)
    
In [9]:
    
## Specifying the loss function
loss = tf.square(y - ypred,name="loss")
    
In [10]:
    
## Specifying the optimiser
optimizer_squared = tf.train.GradientDescentOptimizer(learning_rate=0.001).minimize(loss)
optimizer_huber = tf.train.GradientDescentOptimizer(learning_rate=0.001).minimize(huber_loss(ypred,y))
    
In [11]:
    
## Training the model
n_samples = input_data.shape[0]
init = tf.global_variables_initializer()
y_loss = []
with tf.Session() as sess:
    sess.run(init)
    writer = tf.summary.FileWriter('./graphs',sess.graph)
    for i in range(1,101):
        total_loss_sq = 0
        total_loss_huber = 0
        for index in range(n_samples):
            _,l = sess.run([optimizer_squared,loss],feed_dict=dict({X:input_data[index],y:labels[index]}))
            _,huber = sess.run([optimizer_huber,loss],feed_dict=dict({X:input_data[index],y:labels[index]}))
            total_loss_sq += l
            total_loss_huber += huber
        if i%10 == 0:
            print 'Epoch: {} Squared Loss: {}'.format(i,total_loss_sq/n_samples)
            print 'Epoch: {} Huber Loss: {}'.format(i,total_loss_huber/n_samples)
        y_loss.append(total_loss_sq)
    w_cap,b_cap = sess.run([W,b])
writer.close()
    
    
In [12]:
    
plt.plot(y_loss)
plt.title('Squared Loss vs Epochs')
plt.xlabel('Number of Epochs')
plt.ylabel('Squared Loss')
    
    Out[12]:
    
In [13]:
    
#!tensorboard --logdir='./graphs' --port=6006
plt.plot(input_data,labels,'o',label="Real Data")
plt.plot(input_data,w_cap*input_data+b_cap,label="Predicted Data")
plt.legend()
plt.title('Number of Thefts vs Number of Fires')
plt.xlabel('Number of Fires')
plt.ylabel('Number of Thefts')
    
    Out[13]:
    
In [8]:
    
mnist_data = pd.read_csv('../../Dataset/MNIST/train.csv')
mnist_data.head(3)
    
    Out[8]:
In [15]:
    
plt.imshow(mnist_data.iloc[0,1:].values.reshape(28,28),cmap='gray')
    
    Out[15]:
    
In [9]:
    
tf.reset_default_graph()
labels = pd.get_dummies(mnist_data['label']).values
input_data = mnist_data.iloc[:,1:]
print input_data.shape,labels.shape
    
    
In [10]:
    
## Defining the placeholders for data
batch_size = 128
X = tf.placeholder(tf.float32,shape=[batch_size,784],name="X")
y = tf.placeholder(tf.float32,shape=[batch_size,10],name="y")
    
In [20]:
    
## Defining the variables for weights and biases
W = tf.get_variable(name="weights",
                    shape = [784,10],
                    initializer =  tf.random_normal_initializer())
b = tf.Variable(tf.zeros([1,10]),name="biases")
    
In [21]:
    
## Predicting image class using model
logits = tf.matmul(X,W) + b
ypred = tf.sigmoid(logits)
    
In [22]:
    
## Loss function
entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits,labels=y)
loss = tf.reduce_mean(entropy)
    
In [23]:
    
## Creating the optimizer
optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)
    
In [24]:
    
## Training the model
init = tf.global_variables_initializer()
y_loss = []
n_examples = input_data.shape[0]
with tf.Session() as sess:
    sess.run(init)
    writer = tf.summary.FileWriter('./graphs',sess.graph)
    for i in range(n_examples/batch_size):
        X_batch,y_batch = input_data.iloc[ind],labels[ind]
        _,l = sess.run([optimizer,loss],feed_dict={X:X_batch,y:y_batch})
        if i % 40 == 0:
            print 'Epoch: {} Loss: {}'.format(i,l)
        y_loss.append(l)
    
    
In [25]:
    
plt.plot(y_loss)
plt.title('Squared Loss vs Epochs')
plt.xlabel('Number of Epochs')
plt.ylabel('Squared Loss')
    
    Out[25]:
    
In [ ]:
    
    
In [ ]: